for ( i = 0; i < nr_pages; i++ )
{
- mcl[i].op = __HYPERVISOR_update_va_mapping;
- mcl[i].args[0] = MMAP_VADDR(idx, i);
- mcl[i].args[1] = 0;
- mcl[i].args[2] = 0;
+ MULTI_update_va_mapping(mcl+i, MMAP_VADDR(idx, i),
+ __pte(0), 0);
}
mcl[nr_pages-1].args[2] = UVMF_TLB_FLUSH|UVMF_ALL;
for ( i = 0; i < nseg; i++ )
{
- mcl[i].op = __HYPERVISOR_update_va_mapping_otherdomain;
- mcl[i].args[0] = MMAP_VADDR(pending_idx, i);
- mcl[i].args[1] = (seg[i].buf & PAGE_MASK) | remap_prot;
- mcl[i].args[2] = 0;
- mcl[i].args[3] = blkif->domid;
+ MULTI_update_va_mapping_otherdomain(
+ mcl+i, MMAP_VADDR(pending_idx, i),
+ pfn_pte_ma(seg[i].buf >> PAGE_SHIFT, remap_prot),
+ 0, blkif->domid);
#ifdef CONFIG_XEN_BLKDEV_TAP_BE
if ( blkif->is_blktap )
mcl[i].args[3] = ID_TO_DOM(req->id);
* Heed the comment in pgtable-2level.h:pte_page(). :-)
*/
phys_to_machine_mapping[__pa(skb->data) >> PAGE_SHIFT] = new_mfn;
-
- mcl->op = __HYPERVISOR_update_va_mapping;
- mcl->args[0] = vdata;
- mcl->args[1] = (new_mfn << PAGE_SHIFT) | __PAGE_KERNEL;
- mcl->args[2] = 0;
+
+ MULTI_update_va_mapping(mcl, vdata,
+ pfn_pte_ma(new_mfn, PAGE_KERNEL), 0);
mcl++;
mcl->op = __HYPERVISOR_mmuext_op;
while ( dc != dp )
{
pending_idx = dealloc_ring[MASK_PEND_IDX(dc++)];
- mcl[0].op = __HYPERVISOR_update_va_mapping;
- mcl[0].args[0] = MMAP_VADDR(pending_idx);
- mcl[0].args[1] = 0;
- mcl[0].args[2] = 0;
+ MULTI_update_va_mapping(mcl, MMAP_VADDR(pending_idx),
+ __pte(0), 0);
mcl++;
}
/* Packets passed to netif_rx() must have some headroom. */
skb_reserve(skb, 16);
- mcl[0].op = __HYPERVISOR_update_va_mapping_otherdomain;
- mcl[0].args[0] = MMAP_VADDR(pending_idx);
- mcl[0].args[1] = (txreq.addr & PAGE_MASK) | __PAGE_KERNEL;
- mcl[0].args[2] = 0;
- mcl[0].args[3] = netif->domid;
+ MULTI_update_va_mapping_otherdomain(
+ mcl, MMAP_VADDR(pending_idx),
+ pfn_pte_ma(txreq.addr >> PAGE_SHIFT, PAGE_KERNEL),
+ 0, netif->domid);
mcl++;
memcpy(&pending_tx_info[pending_idx].req, &txreq, sizeof(txreq));
phys_to_machine_mapping[__pa(skb->head) >> PAGE_SHIFT]
= INVALID_P2M_ENTRY;
- rx_mcl[i].op = __HYPERVISOR_update_va_mapping;
- rx_mcl[i].args[0] = (unsigned long)skb->head;
- rx_mcl[i].args[1] = 0;
- rx_mcl[i].args[2] = 0;
+ MULTI_update_va_mapping(rx_mcl+i, (unsigned long)skb->head,
+ __pte(0), 0);
}
/* After all PTEs have been zapped we blow away stale TLB entries. */
mmu->ptr = (rx->addr & PAGE_MASK) | MMU_MACHPHYS_UPDATE;
mmu->val = __pa(skb->head) >> PAGE_SHIFT;
mmu++;
- mcl->op = __HYPERVISOR_update_va_mapping;
- mcl->args[0] = (unsigned long)skb->head;
- mcl->args[1] = (rx->addr & PAGE_MASK) | __PAGE_KERNEL;
- mcl->args[2] = 0;
+ MULTI_update_va_mapping(mcl, (unsigned long)skb->head,
+ pfn_pte_ma(rx->addr >> PAGE_SHIFT, PAGE_KERNEL), 0);
mcl++;
phys_to_machine_mapping[__pa(skb->head) >> PAGE_SHIFT] =
for ( i = 0; i < nr_pages; i++ )
{
- mcl[i].op = __HYPERVISOR_update_va_mapping;
- mcl[i].args[0] = MMAP_VADDR(idx, i);
- mcl[i].args[1] = 0;
- mcl[i].args[2] = 0;
+ MULTI_update_va_mapping(mcl+i, MMAP_VADDR(idx, i),
+ __pte(0), 0);
}
mcl[nr_pages-1].args[2] = UVMF_TLB_FLUSH|UVMF_ALL;
for ( i = 0, offset = 0; offset < req->length;
i++, offset += PAGE_SIZE )
{
- mcl[i].op = __HYPERVISOR_update_va_mapping_otherdomain;
- mcl[i].args[0] = MMAP_VADDR(pending_idx, i);
- mcl[i].args[1] = ((buffer_mach & PAGE_MASK) + offset) | remap_prot;
- mcl[i].args[2] = 0;
- mcl[i].args[3] = up->domid;
+ MULTI_update_va_mapping_otherdomain(
+ mcl+i, MMAP_VADDR(pending_idx, i),
+ pfn_pte_ma(buffer_mach >> PAGE_SHIFT, remap_prot),
+ 0, up->domid);
phys_to_machine_mapping[__pa(MMAP_VADDR(pending_idx, i))>>PAGE_SHIFT] =
FOREIGN_FRAME((buffer_mach + offset) >> PAGE_SHIFT);
if ( req->pipe_type == 0 && req->num_iso > 0 ) /* Maybe schedule ISO... */
{
/* Map in ISO schedule, if necessary. */
- mcl[i].op = __HYPERVISOR_update_va_mapping_otherdomain;
- mcl[i].args[0] = MMAP_VADDR(pending_idx, i);
- mcl[i].args[1] = (req->iso_schedule & PAGE_MASK) | remap_prot;
- mcl[i].args[2] = 0;
- mcl[i].args[3] = up->domid;
+ MULTI_update_va_mapping_otherdomain(
+ mcl+i, MMAP_VADDR(pending_idx, i),
+ pfn_pte_ma(req->iso_schedule >> PAGE_SHIFT, remap_prot),
+ 0, up->domid);
phys_to_machine_mapping[__pa(MMAP_VADDR(pending_idx, i))>>PAGE_SHIFT] =
FOREIGN_FRAME(req->iso_schedule >> PAGE_SHIFT);
unsigned long va, pte_t new_val, unsigned long flags)
{
int ret;
- unsigned long ign1, ign2, ign3;
+ unsigned long ign1, ign2, ign3, ign4;
__asm__ __volatile__ (
TRAP_INSTR
- : "=a" (ret), "=b" (ign1), "=c" (ign2), "=d" (ign3)
+ : "=a" (ret), "=b" (ign1), "=c" (ign2), "=d" (ign3), "=S" (ign4)
: "0" (__HYPERVISOR_update_va_mapping),
- "1" (va), "2" ((new_val).pte_low), "3" (flags)
+ "1" (va), "2" ((new_val).pte_low),
+#ifdef CONFIG_X86_PAE
+ "3" ((new_val).pte_high),
+#else
+ "3" (0),
+#endif
+ "4" (flags)
: "memory" );
if ( unlikely(ret < 0) )
unsigned long va, pte_t new_val, unsigned long flags, domid_t domid)
{
int ret;
- unsigned long ign1, ign2, ign3, ign4;
+ unsigned long ign1, ign2, ign3, ign4, ign5;
__asm__ __volatile__ (
TRAP_INSTR
- : "=a" (ret), "=b" (ign1), "=c" (ign2), "=d" (ign3), "=S" (ign4)
+ : "=a" (ret), "=b" (ign1), "=c" (ign2), "=d" (ign3),
+ "=S" (ign4), "=D" (ign5)
: "0" (__HYPERVISOR_update_va_mapping_otherdomain),
- "1" (va), "2" ((new_val).pte_low), "3" (flags), "4" (domid) :
+ "1" (va), "2" ((new_val).pte_low),
+#ifdef CONFIG_X86_PAE
+ "3" ((new_val).pte_high),
+#else
+ "3" (0),
+#endif
+ "4" (flags), "5" (domid) :
"memory" );
return ret;
#include <asm/hypercall.h>
+static inline void
+MULTI_update_va_mapping(
+ multicall_entry_t *mcl, unsigned long va,
+ pte_t new_val, unsigned long flags)
+{
+ mcl->op = __HYPERVISOR_update_va_mapping;
+ mcl->args[0] = va;
+#if defined(CONFIG_X86_64)
+ mcl->args[1] = new_val.pte;
+ mcl->args[2] = flags;
+#elif defined(CONFIG_X86_PAE)
+ mcl->args[1] = new_val.pte_low;
+ mcl->args[2] = new_val.pte_high;
+ mcl->args[3] = flags;
+#else
+ mcl->args[1] = new_val.pte_low;
+ mcl->args[2] = 0;
+ mcl->args[3] = flags;
+#endif
+}
+
+static inline void
+MULTI_update_va_mapping_otherdomain(
+ multicall_entry_t *mcl, unsigned long va,
+ pte_t new_val, unsigned long flags, domid_t domid)
+{
+ mcl->op = __HYPERVISOR_update_va_mapping_otherdomain;
+ mcl->args[0] = va;
+#if defined(CONFIG_X86_64)
+ mcl->args[1] = new_val.pte;
+ mcl->args[2] = flags;
+ mcl->args[3] = domid;
+#elif defined(CONFIG_X86_PAE)
+ mcl->args[1] = new_val.pte_low;
+ mcl->args[2] = new_val.pte_high;
+ mcl->args[3] = flags;
+ mcl->args[4] = domid;
+#else
+ mcl->args[1] = new_val.pte_low;
+ mcl->args[2] = 0;
+ mcl->args[3] = flags;
+ mcl->args[4] = domid;
+#endif
+}
+
#endif /* __HYPERVISOR_H__ */
}
va = map_domain_page_with_cache(mfn, &mapcache);
- va = (void *)((unsigned long)va + (req.ptr & ~PAGE_MASK));
+ va = (void *)((unsigned long)va +
+ (unsigned long)(req.ptr & ~PAGE_MASK));
page = &frame_table[mfn];
switch ( (type_info = page->u.inuse.type_info) & PGT_type_mask )
break;
default:
- MEM_LOG("Invalid page update command %lx", req.ptr);
+ MEM_LOG("Invalid page update command %x", cmd);
break;
}
}
-int do_update_va_mapping(unsigned long va,
- unsigned long val32,
+int do_update_va_mapping(unsigned long va, u64 val64,
unsigned long flags)
{
- l1_pgentry_t val = l1e_from_intpte(val32);
+ l1_pgentry_t val = l1e_from_intpte(val64);
struct vcpu *v = current;
struct domain *d = v->domain;
unsigned int cpu = v->processor;
return rc;
}
-int do_update_va_mapping_otherdomain(unsigned long va,
- unsigned long val32,
+int do_update_va_mapping_otherdomain(unsigned long va, u64 val64,
unsigned long flags,
domid_t domid)
{
return -ESRCH;
}
- rc = do_update_va_mapping(va, val32, flags);
+ rc = do_update_va_mapping(va, val64, flags);
return rc;
}
* This makes sure that old versions of dom0 tools will stop working in a
* well-defined way (rather than crashing the machine, for instance).
*/
-#define DOM0_INTERFACE_VERSION 0xAAAA100A
+#define DOM0_INTERFACE_VERSION 0xAAAA100B
/************************************************************************/
*/
typedef struct
{
- memory_t ptr; /* Machine address of PTE. */
- memory_t val; /* New contents of PTE. */
+ u64 ptr; /* Machine address of PTE. */
+ u64 val; /* New contents of PTE. */
} mmu_update_t;
/*